gusucode.com > 支持向量机工具箱 - LIBSVM OSU_SVM LS_SVM源码程序 > 支持向量机工具箱 - LIBSVM OSU_SVM LS_SVM\stprtool\linear\anderson\gganders2.m

    function [alpha,theta,solution,minr,t,maxerr]=...
   gganders2(MI,SG,J,tmax,stopCond,t,alpha,theta)
% GGANDERS2 solves Generalized Anderson's task, generalized gradient.
% [alpha,theta,solution,minr,t,maxerr]=...
%    gganders2(MI,SG,J,tmax,stopCond,t,alpha,theta)
%
% GGANDERS2 is implementation of the algorithm that uses theorem of the 
%   generalized gradient optimization for solving the Generalized Anderson's 
%   task (GAT).
% 
%   The GAT finds a separationg hyperplane (alpha'*x = theta) between two 
%   classes such that found solution minimizes the probability of bad 
%   classification. Both classes are described in term of the conditional 
%   probability density functions p(x|k), where x is observation and k is 
%   class identifier (K in {1,2}). The p(x|k) for both the classes has normal 
%   distribution but its genuine parameters are not know, only finite set 
%   of possible parameters is known.
%
%   The algorithm finds such alpha, theta which maximizes criterion minR.
%   There are three possible stop conditions:
%     1. stopCond is [1x1]. The algorithm halts when a change of the 
%     criterion value in two consequantial steps is less than a prescribed 
%     limit stopCond, i.e.
%           abs(minr(t)-minr(t-1)) < stopCond
%
%     2. stopCond is [1x2] = [deltaR,stopT]. The algorithm halts when during
%     the last 'stopT'-th steps a change of the best solution is less than the
%     prescribed limit deltaR, i.e.
%           max minR(t')  -   max  minR(t')  < deltaR
%         t' <= t          t' <= t-sotpT
%      
%     OR 
%     3. The algortihm halts when number of performed iterations exceeds
%     prescribed limit tmax without respect to stopCond, i.e. 
%          t >= tmax
%
% Input:
% (Notation: K is number of input parameters, N is dimension of feature space.)
%
% GGANDERS2(MI,SIGMA,J,tmax,stopCond)
%   MI [NxK] contains K column vectors of mean values MI=[mi_1,mi_2...mi_K],
%      where mi_i is i-th column vector N-by-1. 
%   SIGMA [N,(K*N)] contains K covariance matrices,
%      SIGMA=[sigma_1,sigma_2,...sigma_K], where sigma_i is i-th matrix N-by-N.
%   J [1xK] is vector of class labels J=[label_1,label_2,..,class_K], where
%      label_i is an integer 1 or 2 according to which class the pair 
%      {mi_i,sigma_i} describes.
%   tmax [1x1] is maximal number of steps of algorithm. Default is inf 
%      (it exclude this stop condition).
%   stopCond [1x1] or [1x2] see comments above.
%
% GGANDERS2(MI,SIGMA,J,tmax,stopCond,t,alpha,theta) begins from state given by
%   t [1x1] begin step number.
%   alpha [Nx1], theta [1x1] are state variables of the algorithm.
%
% Returns:
%   alpha [Nx1] is normal vector of found separating hyperplane.
%   theta [1x1] is threshold of separating hyperplane (alpha'*x=theta).
%   solution [1x1] is equal to -1 if solution does not exist,
%      is equal to 0 if solution is not found,
%      is equal to 1 if is found.
%   minr [1x1] is radius of the smallest ellipsoid correseponding to the 
%      quality of found solution.
%   t [1x1] is number of steps the algorithm performed.
%   maxerr [1x1] is probability of bad classification.
%
% See also OANDERS, EANDERS, GANDERS, GANDERS2
%

% Statistical Pattern Recognition Toolbox, Vojtech Franc, Vaclav Hlavac
% (c) Czech Technical University Prague, http://cmp.felk.cvut.cz
% Written Vojtech Franc (diploma thesis) 04.11.1999, 6.5.2000
% Modifications
% 24. 6.00 V. Hlavac, comments polished.
% 20.10.00 V.Franc

% default arguments setting
if nargin < 3,
  error('Not enough input arguments.');
end
if nargin < 4,
  tmax=inf;
end

%%% Gets stop condition
if nargin < 5,
   stopCond = 0;
end
if length(stopCond)==2,
  stopT=stopCond(2);
else
  stopT=1;;
end
deltaR=stopCond(1);

%%% Starts from the prescibed state
if nargin < 6,
   t=0;
end

% goes to homogenous coordinates
if nargin < 8,
   [alpha,MI,SG]=ctransf(0,0,MI,J,SG);
else
   [alpha,MI,SG]=ctransf(alpha,theta,MI,J,SG);
end
%%%

% get dimension N and number of distributions K
N=size(MI,1);
K=size(MI,2);

% implicit value
solution=0;

if t==0,
   % First step
   [alpha,alphaexists]=csvm(MI);

   if alphaexists==0,
      % Solution does not exist.
      alpha = zeros(N,1); minr=0; theta=0;
      solution=-1;
      return;
   else
      tmax=tmax-1;
      t=1;
   end
end

% computes current quality of the solution
[minrs,minri]=min( (alpha'*MI)./sqrt( reshape(alpha'*SG,N,K)'*alpha )' );
minr=minrs(1);
oldminr=minr;     % solution in the last step 
bestminr=minr;    % the best solution so far, value
bestalpha=alpha;  %  --//-- ,optimized variable
queueBestMinR=[];  % queue of the best solutions, stopT steps backward

t0=0;   % counter of performed iterations in this call of the function
% t, is the whole number of iterations, i.e. t(end_fce)=t(begin_fce)+t0;

%%% Main algorithm cycle
 %%%%%%%%%
while solution==0 & tmax > 0,
   tmax = tmax-1;

   % Find contact point x0.
   % computes minr
   [minrs,minri]=min( (alpha'*MI)./sqrt( reshape(alpha'*SG,N,K)'*alpha )' );
   minr=minrs(1);
   i=minri(1);

   % compute x0
   x0=MI(:,i)-(minr/sqrt(alpha'*SG(:,(i-1)*N+1:i*N)*alpha))*SG(:,(i-1)*N+1:i*N)*alpha;

   % compute new alpha
   alpha=alpha+x0/norm(x0);

   oldminr=minr;

   % stores the beast solution so far
   if bestminr<=minr,
      bestalpha=alpha;
      bestminr=minr;
      bestt=t;
   end

   t=t+1;
   t0=t0+1;

   %%%% Stop criterion %%%%%%%%%%%%%%%%
   if t0 > stopT,
      if (bestminr-queueBestMinR(1)) < deltaR,
        solution = 1;
        t=t-1;
      end

      % a queue of the best solutions
      queueBestMinR=[queueBestMinR(2:end),bestminr];  
   else
      queueBestMinR=[queueBestMinR,bestminr];
   end
   
end

%%%% Conclution of the algorithm %%%%%

% gets the best solution so far
alpha=bestalpha;
minr=bestminr;

% returns back to the origin space 
[alpha,theta]=ictransf(alpha);

% computes probability of bad classification
maxerr=1-cdf('norm',minr,0,1);